1*2025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2ca791d7fSThierry Reding /* 3ca791d7fSThierry Reding * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. 4ca791d7fSThierry Reding */ 5ca791d7fSThierry Reding 6ca791d7fSThierry Reding #include <soc/tegra/ivc.h> 7ca791d7fSThierry Reding 8ca791d7fSThierry Reding #define TEGRA_IVC_ALIGN 64 9ca791d7fSThierry Reding 10ca791d7fSThierry Reding /* 11ca791d7fSThierry Reding * IVC channel reset protocol. 12ca791d7fSThierry Reding * 13ca791d7fSThierry Reding * Each end uses its tx_channel.state to indicate its synchronization state. 14ca791d7fSThierry Reding */ 15ca791d7fSThierry Reding enum tegra_ivc_state { 16ca791d7fSThierry Reding /* 17ca791d7fSThierry Reding * This value is zero for backwards compatibility with services that 18ca791d7fSThierry Reding * assume channels to be initially zeroed. Such channels are in an 19ca791d7fSThierry Reding * initially valid state, but cannot be asynchronously reset, and must 20ca791d7fSThierry Reding * maintain a valid state at all times. 21ca791d7fSThierry Reding * 22ca791d7fSThierry Reding * The transmitting end can enter the established state from the sync or 23ca791d7fSThierry Reding * ack state when it observes the receiving endpoint in the ack or 24ca791d7fSThierry Reding * established state, indicating that has cleared the counters in our 25ca791d7fSThierry Reding * rx_channel. 26ca791d7fSThierry Reding */ 27ca791d7fSThierry Reding TEGRA_IVC_STATE_ESTABLISHED = 0, 28ca791d7fSThierry Reding 29ca791d7fSThierry Reding /* 30ca791d7fSThierry Reding * If an endpoint is observed in the sync state, the remote endpoint is 31ca791d7fSThierry Reding * allowed to clear the counters it owns asynchronously with respect to 32ca791d7fSThierry Reding * the current endpoint. Therefore, the current endpoint is no longer 33ca791d7fSThierry Reding * allowed to communicate. 34ca791d7fSThierry Reding */ 35ca791d7fSThierry Reding TEGRA_IVC_STATE_SYNC, 36ca791d7fSThierry Reding 37ca791d7fSThierry Reding /* 38ca791d7fSThierry Reding * When the transmitting end observes the receiving end in the sync 39ca791d7fSThierry Reding * state, it can clear the w_count and r_count and transition to the ack 40ca791d7fSThierry Reding * state. If the remote endpoint observes us in the ack state, it can 41ca791d7fSThierry Reding * return to the established state once it has cleared its counters. 42ca791d7fSThierry Reding */ 43ca791d7fSThierry Reding TEGRA_IVC_STATE_ACK 44ca791d7fSThierry Reding }; 45ca791d7fSThierry Reding 46ca791d7fSThierry Reding /* 47ca791d7fSThierry Reding * This structure is divided into two-cache aligned parts, the first is only 48ca791d7fSThierry Reding * written through the tx.channel pointer, while the second is only written 49ca791d7fSThierry Reding * through the rx.channel pointer. This delineates ownership of the cache 50ca791d7fSThierry Reding * lines, which is critical to performance and necessary in non-cache coherent 51ca791d7fSThierry Reding * implementations. 52ca791d7fSThierry Reding */ 53ca791d7fSThierry Reding struct tegra_ivc_header { 54ca791d7fSThierry Reding union { 55ca791d7fSThierry Reding struct { 56ca791d7fSThierry Reding /* fields owned by the transmitting end */ 57ca791d7fSThierry Reding u32 count; 58ca791d7fSThierry Reding u32 state; 59ca791d7fSThierry Reding }; 60ca791d7fSThierry Reding 61ca791d7fSThierry Reding u8 pad[TEGRA_IVC_ALIGN]; 62ca791d7fSThierry Reding } tx; 63ca791d7fSThierry Reding 64ca791d7fSThierry Reding union { 65ca791d7fSThierry Reding /* fields owned by the receiving end */ 66ca791d7fSThierry Reding u32 count; 67ca791d7fSThierry Reding u8 pad[TEGRA_IVC_ALIGN]; 68ca791d7fSThierry Reding } rx; 69ca791d7fSThierry Reding }; 70ca791d7fSThierry Reding 71ca791d7fSThierry Reding static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys) 72ca791d7fSThierry Reding { 73ca791d7fSThierry Reding if (!ivc->peer) 74ca791d7fSThierry Reding return; 75ca791d7fSThierry Reding 76ca791d7fSThierry Reding dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN, 77ca791d7fSThierry Reding DMA_FROM_DEVICE); 78ca791d7fSThierry Reding } 79ca791d7fSThierry Reding 80ca791d7fSThierry Reding static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys) 81ca791d7fSThierry Reding { 82ca791d7fSThierry Reding if (!ivc->peer) 83ca791d7fSThierry Reding return; 84ca791d7fSThierry Reding 85ca791d7fSThierry Reding dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN, 86ca791d7fSThierry Reding DMA_TO_DEVICE); 87ca791d7fSThierry Reding } 88ca791d7fSThierry Reding 89ca791d7fSThierry Reding static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, 90ca791d7fSThierry Reding struct tegra_ivc_header *header) 91ca791d7fSThierry Reding { 92ca791d7fSThierry Reding /* 93ca791d7fSThierry Reding * This function performs multiple checks on the same values with 94eeafcc5aSMark Rutland * security implications, so create snapshots with READ_ONCE() to 95ca791d7fSThierry Reding * ensure that these checks use the same values. 96ca791d7fSThierry Reding */ 97eeafcc5aSMark Rutland u32 tx = READ_ONCE(header->tx.count); 98eeafcc5aSMark Rutland u32 rx = READ_ONCE(header->rx.count); 99ca791d7fSThierry Reding 100ca791d7fSThierry Reding /* 101ca791d7fSThierry Reding * Perform an over-full check to prevent denial of service attacks 102ca791d7fSThierry Reding * where a server could be easily fooled into believing that there's 103ca791d7fSThierry Reding * an extremely large number of frames ready, since receivers are not 104ca791d7fSThierry Reding * expected to check for full or over-full conditions. 105ca791d7fSThierry Reding * 106ca791d7fSThierry Reding * Although the channel isn't empty, this is an invalid case caused by 107ca791d7fSThierry Reding * a potentially malicious peer, so returning empty is safer, because 108ca791d7fSThierry Reding * it gives the impression that the channel has gone silent. 109ca791d7fSThierry Reding */ 110ca791d7fSThierry Reding if (tx - rx > ivc->num_frames) 111ca791d7fSThierry Reding return true; 112ca791d7fSThierry Reding 113ca791d7fSThierry Reding return tx == rx; 114ca791d7fSThierry Reding } 115ca791d7fSThierry Reding 116ca791d7fSThierry Reding static inline bool tegra_ivc_full(struct tegra_ivc *ivc, 117ca791d7fSThierry Reding struct tegra_ivc_header *header) 118ca791d7fSThierry Reding { 119eeafcc5aSMark Rutland u32 tx = READ_ONCE(header->tx.count); 120eeafcc5aSMark Rutland u32 rx = READ_ONCE(header->rx.count); 121ca791d7fSThierry Reding 122ca791d7fSThierry Reding /* 123ca791d7fSThierry Reding * Invalid cases where the counters indicate that the queue is over 124ca791d7fSThierry Reding * capacity also appear full. 125ca791d7fSThierry Reding */ 126ca791d7fSThierry Reding return tx - rx >= ivc->num_frames; 127ca791d7fSThierry Reding } 128ca791d7fSThierry Reding 129ca791d7fSThierry Reding static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, 130ca791d7fSThierry Reding struct tegra_ivc_header *header) 131ca791d7fSThierry Reding { 132eeafcc5aSMark Rutland u32 tx = READ_ONCE(header->tx.count); 133eeafcc5aSMark Rutland u32 rx = READ_ONCE(header->rx.count); 134ca791d7fSThierry Reding 135ca791d7fSThierry Reding /* 136ca791d7fSThierry Reding * This function isn't expected to be used in scenarios where an 137ca791d7fSThierry Reding * over-full situation can lead to denial of service attacks. See the 138ca791d7fSThierry Reding * comment in tegra_ivc_empty() for an explanation about special 139ca791d7fSThierry Reding * over-full considerations. 140ca791d7fSThierry Reding */ 141ca791d7fSThierry Reding return tx - rx; 142ca791d7fSThierry Reding } 143ca791d7fSThierry Reding 144ca791d7fSThierry Reding static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) 145ca791d7fSThierry Reding { 146eeafcc5aSMark Rutland WRITE_ONCE(ivc->tx.channel->tx.count, 147eeafcc5aSMark Rutland READ_ONCE(ivc->tx.channel->tx.count) + 1); 148ca791d7fSThierry Reding 149ca791d7fSThierry Reding if (ivc->tx.position == ivc->num_frames - 1) 150ca791d7fSThierry Reding ivc->tx.position = 0; 151ca791d7fSThierry Reding else 152ca791d7fSThierry Reding ivc->tx.position++; 153ca791d7fSThierry Reding } 154ca791d7fSThierry Reding 155ca791d7fSThierry Reding static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) 156ca791d7fSThierry Reding { 157eeafcc5aSMark Rutland WRITE_ONCE(ivc->rx.channel->rx.count, 158eeafcc5aSMark Rutland READ_ONCE(ivc->rx.channel->rx.count) + 1); 159ca791d7fSThierry Reding 160ca791d7fSThierry Reding if (ivc->rx.position == ivc->num_frames - 1) 161ca791d7fSThierry Reding ivc->rx.position = 0; 162ca791d7fSThierry Reding else 163ca791d7fSThierry Reding ivc->rx.position++; 164ca791d7fSThierry Reding } 165ca791d7fSThierry Reding 166ca791d7fSThierry Reding static inline int tegra_ivc_check_read(struct tegra_ivc *ivc) 167ca791d7fSThierry Reding { 168ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 169ca791d7fSThierry Reding 170ca791d7fSThierry Reding /* 171ca791d7fSThierry Reding * tx.channel->state is set locally, so it is not synchronized with 172ca791d7fSThierry Reding * state from the remote peer. The remote peer cannot reset its 173ca791d7fSThierry Reding * transmit counters until we've acknowledged its synchronization 174ca791d7fSThierry Reding * request, so no additional synchronization is required because an 175ca791d7fSThierry Reding * asynchronous transition of rx.channel->state to 176ca791d7fSThierry Reding * TEGRA_IVC_STATE_ACK is not allowed. 177ca791d7fSThierry Reding */ 178ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 179ca791d7fSThierry Reding return -ECONNRESET; 180ca791d7fSThierry Reding 181ca791d7fSThierry Reding /* 182ca791d7fSThierry Reding * Avoid unnecessary invalidations when performing repeated accesses 183ca791d7fSThierry Reding * to an IVC channel by checking the old queue pointers first. 184ca791d7fSThierry Reding * 185ca791d7fSThierry Reding * Synchronization is only necessary when these pointers indicate 186ca791d7fSThierry Reding * empty or full. 187ca791d7fSThierry Reding */ 188ca791d7fSThierry Reding if (!tegra_ivc_empty(ivc, ivc->rx.channel)) 189ca791d7fSThierry Reding return 0; 190ca791d7fSThierry Reding 191ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); 192ca791d7fSThierry Reding 193ca791d7fSThierry Reding if (tegra_ivc_empty(ivc, ivc->rx.channel)) 194ca791d7fSThierry Reding return -ENOSPC; 195ca791d7fSThierry Reding 196ca791d7fSThierry Reding return 0; 197ca791d7fSThierry Reding } 198ca791d7fSThierry Reding 199ca791d7fSThierry Reding static inline int tegra_ivc_check_write(struct tegra_ivc *ivc) 200ca791d7fSThierry Reding { 201ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, rx.count); 202ca791d7fSThierry Reding 203ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 204ca791d7fSThierry Reding return -ECONNRESET; 205ca791d7fSThierry Reding 206ca791d7fSThierry Reding if (!tegra_ivc_full(ivc, ivc->tx.channel)) 207ca791d7fSThierry Reding return 0; 208ca791d7fSThierry Reding 209ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->tx.phys + offset); 210ca791d7fSThierry Reding 211ca791d7fSThierry Reding if (tegra_ivc_full(ivc, ivc->tx.channel)) 212ca791d7fSThierry Reding return -ENOSPC; 213ca791d7fSThierry Reding 214ca791d7fSThierry Reding return 0; 215ca791d7fSThierry Reding } 216ca791d7fSThierry Reding 217ca791d7fSThierry Reding static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc, 218ca791d7fSThierry Reding struct tegra_ivc_header *header, 219ca791d7fSThierry Reding unsigned int frame) 220ca791d7fSThierry Reding { 221ca791d7fSThierry Reding if (WARN_ON(frame >= ivc->num_frames)) 222ca791d7fSThierry Reding return ERR_PTR(-EINVAL); 223ca791d7fSThierry Reding 224ca791d7fSThierry Reding return (void *)(header + 1) + ivc->frame_size * frame; 225ca791d7fSThierry Reding } 226ca791d7fSThierry Reding 227ca791d7fSThierry Reding static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc, 228ca791d7fSThierry Reding dma_addr_t phys, 229ca791d7fSThierry Reding unsigned int frame) 230ca791d7fSThierry Reding { 231ca791d7fSThierry Reding unsigned long offset; 232ca791d7fSThierry Reding 233ca791d7fSThierry Reding offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame; 234ca791d7fSThierry Reding 235ca791d7fSThierry Reding return phys + offset; 236ca791d7fSThierry Reding } 237ca791d7fSThierry Reding 238ca791d7fSThierry Reding static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc, 239ca791d7fSThierry Reding dma_addr_t phys, 240ca791d7fSThierry Reding unsigned int frame, 241ca791d7fSThierry Reding unsigned int offset, 242ca791d7fSThierry Reding size_t size) 243ca791d7fSThierry Reding { 244ca791d7fSThierry Reding if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) 245ca791d7fSThierry Reding return; 246ca791d7fSThierry Reding 247ca791d7fSThierry Reding phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; 248ca791d7fSThierry Reding 249ca791d7fSThierry Reding dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE); 250ca791d7fSThierry Reding } 251ca791d7fSThierry Reding 252ca791d7fSThierry Reding static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc, 253ca791d7fSThierry Reding dma_addr_t phys, 254ca791d7fSThierry Reding unsigned int frame, 255ca791d7fSThierry Reding unsigned int offset, 256ca791d7fSThierry Reding size_t size) 257ca791d7fSThierry Reding { 258ca791d7fSThierry Reding if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) 259ca791d7fSThierry Reding return; 260ca791d7fSThierry Reding 261ca791d7fSThierry Reding phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; 262ca791d7fSThierry Reding 263ca791d7fSThierry Reding dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE); 264ca791d7fSThierry Reding } 265ca791d7fSThierry Reding 266ca791d7fSThierry Reding /* directly peek at the next frame rx'ed */ 267ca791d7fSThierry Reding void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc) 268ca791d7fSThierry Reding { 269ca791d7fSThierry Reding int err; 270ca791d7fSThierry Reding 271ca791d7fSThierry Reding if (WARN_ON(ivc == NULL)) 272ca791d7fSThierry Reding return ERR_PTR(-EINVAL); 273ca791d7fSThierry Reding 274ca791d7fSThierry Reding err = tegra_ivc_check_read(ivc); 275ca791d7fSThierry Reding if (err < 0) 276ca791d7fSThierry Reding return ERR_PTR(err); 277ca791d7fSThierry Reding 278ca791d7fSThierry Reding /* 279ca791d7fSThierry Reding * Order observation of ivc->rx.position potentially indicating new 280ca791d7fSThierry Reding * data before data read. 281ca791d7fSThierry Reding */ 282ca791d7fSThierry Reding smp_rmb(); 283ca791d7fSThierry Reding 284ca791d7fSThierry Reding tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0, 285ca791d7fSThierry Reding ivc->frame_size); 286ca791d7fSThierry Reding 287ca791d7fSThierry Reding return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position); 288ca791d7fSThierry Reding } 289ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_read_get_next_frame); 290ca791d7fSThierry Reding 291ca791d7fSThierry Reding int tegra_ivc_read_advance(struct tegra_ivc *ivc) 292ca791d7fSThierry Reding { 293ca791d7fSThierry Reding unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); 294ca791d7fSThierry Reding unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); 295ca791d7fSThierry Reding int err; 296ca791d7fSThierry Reding 297ca791d7fSThierry Reding /* 298ca791d7fSThierry Reding * No read barriers or synchronization here: the caller is expected to 299ca791d7fSThierry Reding * have already observed the channel non-empty. This check is just to 300ca791d7fSThierry Reding * catch programming errors. 301ca791d7fSThierry Reding */ 302ca791d7fSThierry Reding err = tegra_ivc_check_read(ivc); 303ca791d7fSThierry Reding if (err < 0) 304ca791d7fSThierry Reding return err; 305ca791d7fSThierry Reding 306ca791d7fSThierry Reding tegra_ivc_advance_rx(ivc); 307ca791d7fSThierry Reding 308ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->rx.phys + rx); 309ca791d7fSThierry Reding 310ca791d7fSThierry Reding /* 311ca791d7fSThierry Reding * Ensure our write to ivc->rx.position occurs before our read from 312ca791d7fSThierry Reding * ivc->tx.position. 313ca791d7fSThierry Reding */ 314ca791d7fSThierry Reding smp_mb(); 315ca791d7fSThierry Reding 316ca791d7fSThierry Reding /* 317ca791d7fSThierry Reding * Notify only upon transition from full to non-full. The available 318ca791d7fSThierry Reding * count can only asynchronously increase, so the worst possible 319ca791d7fSThierry Reding * side-effect will be a spurious notification. 320ca791d7fSThierry Reding */ 321ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + tx); 322ca791d7fSThierry Reding 323ca791d7fSThierry Reding if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1) 324ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 325ca791d7fSThierry Reding 326ca791d7fSThierry Reding return 0; 327ca791d7fSThierry Reding } 328ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_read_advance); 329ca791d7fSThierry Reding 330ca791d7fSThierry Reding /* directly poke at the next frame to be tx'ed */ 331ca791d7fSThierry Reding void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc) 332ca791d7fSThierry Reding { 333ca791d7fSThierry Reding int err; 334ca791d7fSThierry Reding 335ca791d7fSThierry Reding err = tegra_ivc_check_write(ivc); 336ca791d7fSThierry Reding if (err < 0) 337ca791d7fSThierry Reding return ERR_PTR(err); 338ca791d7fSThierry Reding 339ca791d7fSThierry Reding return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position); 340ca791d7fSThierry Reding } 341ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_write_get_next_frame); 342ca791d7fSThierry Reding 343ca791d7fSThierry Reding /* advance the tx buffer */ 344ca791d7fSThierry Reding int tegra_ivc_write_advance(struct tegra_ivc *ivc) 345ca791d7fSThierry Reding { 346ca791d7fSThierry Reding unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); 347ca791d7fSThierry Reding unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); 348ca791d7fSThierry Reding int err; 349ca791d7fSThierry Reding 350ca791d7fSThierry Reding err = tegra_ivc_check_write(ivc); 351ca791d7fSThierry Reding if (err < 0) 352ca791d7fSThierry Reding return err; 353ca791d7fSThierry Reding 354ca791d7fSThierry Reding tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0, 355ca791d7fSThierry Reding ivc->frame_size); 356ca791d7fSThierry Reding 357ca791d7fSThierry Reding /* 358ca791d7fSThierry Reding * Order any possible stores to the frame before update of 359ca791d7fSThierry Reding * ivc->tx.position. 360ca791d7fSThierry Reding */ 361ca791d7fSThierry Reding smp_wmb(); 362ca791d7fSThierry Reding 363ca791d7fSThierry Reding tegra_ivc_advance_tx(ivc); 364ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + tx); 365ca791d7fSThierry Reding 366ca791d7fSThierry Reding /* 367ca791d7fSThierry Reding * Ensure our write to ivc->tx.position occurs before our read from 368ca791d7fSThierry Reding * ivc->rx.position. 369ca791d7fSThierry Reding */ 370ca791d7fSThierry Reding smp_mb(); 371ca791d7fSThierry Reding 372ca791d7fSThierry Reding /* 373ca791d7fSThierry Reding * Notify only upon transition from empty to non-empty. The available 374ca791d7fSThierry Reding * count can only asynchronously decrease, so the worst possible 375ca791d7fSThierry Reding * side-effect will be a spurious notification. 376ca791d7fSThierry Reding */ 377ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->tx.phys + rx); 378ca791d7fSThierry Reding 379ca791d7fSThierry Reding if (tegra_ivc_available(ivc, ivc->tx.channel) == 1) 380ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 381ca791d7fSThierry Reding 382ca791d7fSThierry Reding return 0; 383ca791d7fSThierry Reding } 384ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_write_advance); 385ca791d7fSThierry Reding 386ca791d7fSThierry Reding void tegra_ivc_reset(struct tegra_ivc *ivc) 387ca791d7fSThierry Reding { 388ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 389ca791d7fSThierry Reding 390ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC; 391ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 392ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 393ca791d7fSThierry Reding } 394ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_reset); 395ca791d7fSThierry Reding 396ca791d7fSThierry Reding /* 397ca791d7fSThierry Reding * ======================================================= 398ca791d7fSThierry Reding * IVC State Transition Table - see tegra_ivc_notified() 399ca791d7fSThierry Reding * ======================================================= 400ca791d7fSThierry Reding * 401ca791d7fSThierry Reding * local remote action 402ca791d7fSThierry Reding * ----- ------ ----------------------------------- 403ca791d7fSThierry Reding * SYNC EST <none> 404ca791d7fSThierry Reding * SYNC ACK reset counters; move to EST; notify 405ca791d7fSThierry Reding * SYNC SYNC reset counters; move to ACK; notify 406ca791d7fSThierry Reding * ACK EST move to EST; notify 407ca791d7fSThierry Reding * ACK ACK move to EST; notify 408ca791d7fSThierry Reding * ACK SYNC reset counters; move to ACK; notify 409ca791d7fSThierry Reding * EST EST <none> 410ca791d7fSThierry Reding * EST ACK <none> 411ca791d7fSThierry Reding * EST SYNC reset counters; move to ACK; notify 412ca791d7fSThierry Reding * 413ca791d7fSThierry Reding * =============================================================== 414ca791d7fSThierry Reding */ 415ca791d7fSThierry Reding 416ca791d7fSThierry Reding int tegra_ivc_notified(struct tegra_ivc *ivc) 417ca791d7fSThierry Reding { 418ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 419ca791d7fSThierry Reding enum tegra_ivc_state state; 420ca791d7fSThierry Reding 421ca791d7fSThierry Reding /* Copy the receiver's state out of shared memory. */ 422ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); 423eeafcc5aSMark Rutland state = READ_ONCE(ivc->rx.channel->tx.state); 424ca791d7fSThierry Reding 425ca791d7fSThierry Reding if (state == TEGRA_IVC_STATE_SYNC) { 426ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 427ca791d7fSThierry Reding 428ca791d7fSThierry Reding /* 429ca791d7fSThierry Reding * Order observation of TEGRA_IVC_STATE_SYNC before stores 430ca791d7fSThierry Reding * clearing tx.channel. 431ca791d7fSThierry Reding */ 432ca791d7fSThierry Reding smp_rmb(); 433ca791d7fSThierry Reding 434ca791d7fSThierry Reding /* 435ca791d7fSThierry Reding * Reset tx.channel counters. The remote end is in the SYNC 436ca791d7fSThierry Reding * state and won't make progress until we change our state, 437ca791d7fSThierry Reding * so the counters are not in use at this time. 438ca791d7fSThierry Reding */ 439ca791d7fSThierry Reding ivc->tx.channel->tx.count = 0; 440ca791d7fSThierry Reding ivc->rx.channel->rx.count = 0; 441ca791d7fSThierry Reding 442ca791d7fSThierry Reding ivc->tx.position = 0; 443ca791d7fSThierry Reding ivc->rx.position = 0; 444ca791d7fSThierry Reding 445ca791d7fSThierry Reding /* 446ca791d7fSThierry Reding * Ensure that counters appear cleared before new state can be 447ca791d7fSThierry Reding * observed. 448ca791d7fSThierry Reding */ 449ca791d7fSThierry Reding smp_wmb(); 450ca791d7fSThierry Reding 451ca791d7fSThierry Reding /* 452ca791d7fSThierry Reding * Move to ACK state. We have just cleared our counters, so it 453ca791d7fSThierry Reding * is now safe for the remote end to start using these values. 454ca791d7fSThierry Reding */ 455ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK; 456ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 457ca791d7fSThierry Reding 458ca791d7fSThierry Reding /* 459ca791d7fSThierry Reding * Notify remote end to observe state transition. 460ca791d7fSThierry Reding */ 461ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 462ca791d7fSThierry Reding 463ca791d7fSThierry Reding } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC && 464ca791d7fSThierry Reding state == TEGRA_IVC_STATE_ACK) { 465ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 466ca791d7fSThierry Reding 467ca791d7fSThierry Reding /* 468ca791d7fSThierry Reding * Order observation of ivc_state_sync before stores clearing 469ca791d7fSThierry Reding * tx_channel. 470ca791d7fSThierry Reding */ 471ca791d7fSThierry Reding smp_rmb(); 472ca791d7fSThierry Reding 473ca791d7fSThierry Reding /* 474ca791d7fSThierry Reding * Reset tx.channel counters. The remote end is in the ACK 475ca791d7fSThierry Reding * state and won't make progress until we change our state, 476ca791d7fSThierry Reding * so the counters are not in use at this time. 477ca791d7fSThierry Reding */ 478ca791d7fSThierry Reding ivc->tx.channel->tx.count = 0; 479ca791d7fSThierry Reding ivc->rx.channel->rx.count = 0; 480ca791d7fSThierry Reding 481ca791d7fSThierry Reding ivc->tx.position = 0; 482ca791d7fSThierry Reding ivc->rx.position = 0; 483ca791d7fSThierry Reding 484ca791d7fSThierry Reding /* 485ca791d7fSThierry Reding * Ensure that counters appear cleared before new state can be 486ca791d7fSThierry Reding * observed. 487ca791d7fSThierry Reding */ 488ca791d7fSThierry Reding smp_wmb(); 489ca791d7fSThierry Reding 490ca791d7fSThierry Reding /* 491ca791d7fSThierry Reding * Move to ESTABLISHED state. We know that the remote end has 492ca791d7fSThierry Reding * already cleared its counters, so it is safe to start 493ca791d7fSThierry Reding * writing/reading on this channel. 494ca791d7fSThierry Reding */ 495ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; 496ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 497ca791d7fSThierry Reding 498ca791d7fSThierry Reding /* 499ca791d7fSThierry Reding * Notify remote end to observe state transition. 500ca791d7fSThierry Reding */ 501ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 502ca791d7fSThierry Reding 503ca791d7fSThierry Reding } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) { 504ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 505ca791d7fSThierry Reding 506ca791d7fSThierry Reding /* 507ca791d7fSThierry Reding * At this point, we have observed the peer to be in either 508ca791d7fSThierry Reding * the ACK or ESTABLISHED state. Next, order observation of 509ca791d7fSThierry Reding * peer state before storing to tx.channel. 510ca791d7fSThierry Reding */ 511ca791d7fSThierry Reding smp_rmb(); 512ca791d7fSThierry Reding 513ca791d7fSThierry Reding /* 514ca791d7fSThierry Reding * Move to ESTABLISHED state. We know that we have previously 515ca791d7fSThierry Reding * cleared our counters, and we know that the remote end has 516ca791d7fSThierry Reding * cleared its counters, so it is safe to start writing/reading 517ca791d7fSThierry Reding * on this channel. 518ca791d7fSThierry Reding */ 519ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; 520ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 521ca791d7fSThierry Reding 522ca791d7fSThierry Reding /* 523ca791d7fSThierry Reding * Notify remote end to observe state transition. 524ca791d7fSThierry Reding */ 525ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 526ca791d7fSThierry Reding 527ca791d7fSThierry Reding } else { 528ca791d7fSThierry Reding /* 529ca791d7fSThierry Reding * There is no need to handle any further action. Either the 530ca791d7fSThierry Reding * channel is already fully established, or we are waiting for 531ca791d7fSThierry Reding * the remote end to catch up with our current state. Refer 532ca791d7fSThierry Reding * to the diagram in "IVC State Transition Table" above. 533ca791d7fSThierry Reding */ 534ca791d7fSThierry Reding } 535ca791d7fSThierry Reding 536ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 537ca791d7fSThierry Reding return -EAGAIN; 538ca791d7fSThierry Reding 539ca791d7fSThierry Reding return 0; 540ca791d7fSThierry Reding } 541ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_notified); 542ca791d7fSThierry Reding 543ca791d7fSThierry Reding size_t tegra_ivc_align(size_t size) 544ca791d7fSThierry Reding { 545ca791d7fSThierry Reding return ALIGN(size, TEGRA_IVC_ALIGN); 546ca791d7fSThierry Reding } 547ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_align); 548ca791d7fSThierry Reding 549ca791d7fSThierry Reding unsigned tegra_ivc_total_queue_size(unsigned queue_size) 550ca791d7fSThierry Reding { 551ca791d7fSThierry Reding if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) { 552ca791d7fSThierry Reding pr_err("%s: queue_size (%u) must be %u-byte aligned\n", 553ca791d7fSThierry Reding __func__, queue_size, TEGRA_IVC_ALIGN); 554ca791d7fSThierry Reding return 0; 555ca791d7fSThierry Reding } 556ca791d7fSThierry Reding 557ca791d7fSThierry Reding return queue_size + sizeof(struct tegra_ivc_header); 558ca791d7fSThierry Reding } 559ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_total_queue_size); 560ca791d7fSThierry Reding 561ca791d7fSThierry Reding static int tegra_ivc_check_params(unsigned long rx, unsigned long tx, 562ca791d7fSThierry Reding unsigned int num_frames, size_t frame_size) 563ca791d7fSThierry Reding { 564ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count), 565ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 566ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count), 567ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 568ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header), 569ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 570ca791d7fSThierry Reding 571ca791d7fSThierry Reding if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) { 572ca791d7fSThierry Reding pr_err("num_frames * frame_size overflows\n"); 573ca791d7fSThierry Reding return -EINVAL; 574ca791d7fSThierry Reding } 575ca791d7fSThierry Reding 576ca791d7fSThierry Reding if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) { 577ca791d7fSThierry Reding pr_err("frame size not adequately aligned: %zu\n", frame_size); 578ca791d7fSThierry Reding return -EINVAL; 579ca791d7fSThierry Reding } 580ca791d7fSThierry Reding 581ca791d7fSThierry Reding /* 582ca791d7fSThierry Reding * The headers must at least be aligned enough for counters 583ca791d7fSThierry Reding * to be accessed atomically. 584ca791d7fSThierry Reding */ 585ca791d7fSThierry Reding if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) { 586ca791d7fSThierry Reding pr_err("IVC channel start not aligned: %#lx\n", rx); 587ca791d7fSThierry Reding return -EINVAL; 588ca791d7fSThierry Reding } 589ca791d7fSThierry Reding 590ca791d7fSThierry Reding if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) { 591ca791d7fSThierry Reding pr_err("IVC channel start not aligned: %#lx\n", tx); 592ca791d7fSThierry Reding return -EINVAL; 593ca791d7fSThierry Reding } 594ca791d7fSThierry Reding 595ca791d7fSThierry Reding if (rx < tx) { 596ca791d7fSThierry Reding if (rx + frame_size * num_frames > tx) { 597ca791d7fSThierry Reding pr_err("queue regions overlap: %#lx + %zx > %#lx\n", 598ca791d7fSThierry Reding rx, frame_size * num_frames, tx); 599ca791d7fSThierry Reding return -EINVAL; 600ca791d7fSThierry Reding } 601ca791d7fSThierry Reding } else { 602ca791d7fSThierry Reding if (tx + frame_size * num_frames > rx) { 603ca791d7fSThierry Reding pr_err("queue regions overlap: %#lx + %zx > %#lx\n", 604ca791d7fSThierry Reding tx, frame_size * num_frames, rx); 605ca791d7fSThierry Reding return -EINVAL; 606ca791d7fSThierry Reding } 607ca791d7fSThierry Reding } 608ca791d7fSThierry Reding 609ca791d7fSThierry Reding return 0; 610ca791d7fSThierry Reding } 611ca791d7fSThierry Reding 612ca791d7fSThierry Reding int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx, 613ca791d7fSThierry Reding dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys, 614ca791d7fSThierry Reding unsigned int num_frames, size_t frame_size, 615ca791d7fSThierry Reding void (*notify)(struct tegra_ivc *ivc, void *data), 616ca791d7fSThierry Reding void *data) 617ca791d7fSThierry Reding { 618ca791d7fSThierry Reding size_t queue_size; 619ca791d7fSThierry Reding int err; 620ca791d7fSThierry Reding 621ca791d7fSThierry Reding if (WARN_ON(!ivc || !notify)) 622ca791d7fSThierry Reding return -EINVAL; 623ca791d7fSThierry Reding 624ca791d7fSThierry Reding /* 625ca791d7fSThierry Reding * All sizes that can be returned by communication functions should 626ca791d7fSThierry Reding * fit in an int. 627ca791d7fSThierry Reding */ 628ca791d7fSThierry Reding if (frame_size > INT_MAX) 629ca791d7fSThierry Reding return -E2BIG; 630ca791d7fSThierry Reding 631ca791d7fSThierry Reding err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx, 632ca791d7fSThierry Reding num_frames, frame_size); 633ca791d7fSThierry Reding if (err < 0) 634ca791d7fSThierry Reding return err; 635ca791d7fSThierry Reding 636ca791d7fSThierry Reding queue_size = tegra_ivc_total_queue_size(num_frames * frame_size); 637ca791d7fSThierry Reding 638ca791d7fSThierry Reding if (peer) { 639ca791d7fSThierry Reding ivc->rx.phys = dma_map_single(peer, rx, queue_size, 640ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 64154ed8121SChristoph Hellwig if (dma_mapping_error(peer, ivc->rx.phys)) 642ca791d7fSThierry Reding return -ENOMEM; 643ca791d7fSThierry Reding 644ca791d7fSThierry Reding ivc->tx.phys = dma_map_single(peer, tx, queue_size, 645ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 64654ed8121SChristoph Hellwig if (dma_mapping_error(peer, ivc->tx.phys)) { 647ca791d7fSThierry Reding dma_unmap_single(peer, ivc->rx.phys, queue_size, 648ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 649ca791d7fSThierry Reding return -ENOMEM; 650ca791d7fSThierry Reding } 651ca791d7fSThierry Reding } else { 652ca791d7fSThierry Reding ivc->rx.phys = rx_phys; 653ca791d7fSThierry Reding ivc->tx.phys = tx_phys; 654ca791d7fSThierry Reding } 655ca791d7fSThierry Reding 656ca791d7fSThierry Reding ivc->rx.channel = rx; 657ca791d7fSThierry Reding ivc->tx.channel = tx; 658ca791d7fSThierry Reding ivc->peer = peer; 659ca791d7fSThierry Reding ivc->notify = notify; 660ca791d7fSThierry Reding ivc->notify_data = data; 661ca791d7fSThierry Reding ivc->frame_size = frame_size; 662ca791d7fSThierry Reding ivc->num_frames = num_frames; 663ca791d7fSThierry Reding 664ca791d7fSThierry Reding /* 665ca791d7fSThierry Reding * These values aren't necessarily correct until the channel has been 666ca791d7fSThierry Reding * reset. 667ca791d7fSThierry Reding */ 668ca791d7fSThierry Reding ivc->tx.position = 0; 669ca791d7fSThierry Reding ivc->rx.position = 0; 670ca791d7fSThierry Reding 671ca791d7fSThierry Reding return 0; 672ca791d7fSThierry Reding } 673ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_init); 674ca791d7fSThierry Reding 675ca791d7fSThierry Reding void tegra_ivc_cleanup(struct tegra_ivc *ivc) 676ca791d7fSThierry Reding { 677ca791d7fSThierry Reding if (ivc->peer) { 678ca791d7fSThierry Reding size_t size = tegra_ivc_total_queue_size(ivc->num_frames * 679ca791d7fSThierry Reding ivc->frame_size); 680ca791d7fSThierry Reding 681ca791d7fSThierry Reding dma_unmap_single(ivc->peer, ivc->rx.phys, size, 682ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 683ca791d7fSThierry Reding dma_unmap_single(ivc->peer, ivc->tx.phys, size, 684ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 685ca791d7fSThierry Reding } 686ca791d7fSThierry Reding } 687ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_cleanup); 688