1*ca791d7fSThierry Reding /* 2*ca791d7fSThierry Reding * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. 3*ca791d7fSThierry Reding * 4*ca791d7fSThierry Reding * This program is free software; you can redistribute it and/or modify it 5*ca791d7fSThierry Reding * under the terms and conditions of the GNU General Public License, 6*ca791d7fSThierry Reding * version 2, as published by the Free Software Foundation. 7*ca791d7fSThierry Reding * 8*ca791d7fSThierry Reding * This program is distributed in the hope it will be useful, but WITHOUT 9*ca791d7fSThierry Reding * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10*ca791d7fSThierry Reding * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11*ca791d7fSThierry Reding * more details. 12*ca791d7fSThierry Reding */ 13*ca791d7fSThierry Reding 14*ca791d7fSThierry Reding #include <soc/tegra/ivc.h> 15*ca791d7fSThierry Reding 16*ca791d7fSThierry Reding #define TEGRA_IVC_ALIGN 64 17*ca791d7fSThierry Reding 18*ca791d7fSThierry Reding /* 19*ca791d7fSThierry Reding * IVC channel reset protocol. 20*ca791d7fSThierry Reding * 21*ca791d7fSThierry Reding * Each end uses its tx_channel.state to indicate its synchronization state. 22*ca791d7fSThierry Reding */ 23*ca791d7fSThierry Reding enum tegra_ivc_state { 24*ca791d7fSThierry Reding /* 25*ca791d7fSThierry Reding * This value is zero for backwards compatibility with services that 26*ca791d7fSThierry Reding * assume channels to be initially zeroed. Such channels are in an 27*ca791d7fSThierry Reding * initially valid state, but cannot be asynchronously reset, and must 28*ca791d7fSThierry Reding * maintain a valid state at all times. 29*ca791d7fSThierry Reding * 30*ca791d7fSThierry Reding * The transmitting end can enter the established state from the sync or 31*ca791d7fSThierry Reding * ack state when it observes the receiving endpoint in the ack or 32*ca791d7fSThierry Reding * established state, indicating that has cleared the counters in our 33*ca791d7fSThierry Reding * rx_channel. 34*ca791d7fSThierry Reding */ 35*ca791d7fSThierry Reding TEGRA_IVC_STATE_ESTABLISHED = 0, 36*ca791d7fSThierry Reding 37*ca791d7fSThierry Reding /* 38*ca791d7fSThierry Reding * If an endpoint is observed in the sync state, the remote endpoint is 39*ca791d7fSThierry Reding * allowed to clear the counters it owns asynchronously with respect to 40*ca791d7fSThierry Reding * the current endpoint. Therefore, the current endpoint is no longer 41*ca791d7fSThierry Reding * allowed to communicate. 42*ca791d7fSThierry Reding */ 43*ca791d7fSThierry Reding TEGRA_IVC_STATE_SYNC, 44*ca791d7fSThierry Reding 45*ca791d7fSThierry Reding /* 46*ca791d7fSThierry Reding * When the transmitting end observes the receiving end in the sync 47*ca791d7fSThierry Reding * state, it can clear the w_count and r_count and transition to the ack 48*ca791d7fSThierry Reding * state. If the remote endpoint observes us in the ack state, it can 49*ca791d7fSThierry Reding * return to the established state once it has cleared its counters. 50*ca791d7fSThierry Reding */ 51*ca791d7fSThierry Reding TEGRA_IVC_STATE_ACK 52*ca791d7fSThierry Reding }; 53*ca791d7fSThierry Reding 54*ca791d7fSThierry Reding /* 55*ca791d7fSThierry Reding * This structure is divided into two-cache aligned parts, the first is only 56*ca791d7fSThierry Reding * written through the tx.channel pointer, while the second is only written 57*ca791d7fSThierry Reding * through the rx.channel pointer. This delineates ownership of the cache 58*ca791d7fSThierry Reding * lines, which is critical to performance and necessary in non-cache coherent 59*ca791d7fSThierry Reding * implementations. 60*ca791d7fSThierry Reding */ 61*ca791d7fSThierry Reding struct tegra_ivc_header { 62*ca791d7fSThierry Reding union { 63*ca791d7fSThierry Reding struct { 64*ca791d7fSThierry Reding /* fields owned by the transmitting end */ 65*ca791d7fSThierry Reding u32 count; 66*ca791d7fSThierry Reding u32 state; 67*ca791d7fSThierry Reding }; 68*ca791d7fSThierry Reding 69*ca791d7fSThierry Reding u8 pad[TEGRA_IVC_ALIGN]; 70*ca791d7fSThierry Reding } tx; 71*ca791d7fSThierry Reding 72*ca791d7fSThierry Reding union { 73*ca791d7fSThierry Reding /* fields owned by the receiving end */ 74*ca791d7fSThierry Reding u32 count; 75*ca791d7fSThierry Reding u8 pad[TEGRA_IVC_ALIGN]; 76*ca791d7fSThierry Reding } rx; 77*ca791d7fSThierry Reding }; 78*ca791d7fSThierry Reding 79*ca791d7fSThierry Reding static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys) 80*ca791d7fSThierry Reding { 81*ca791d7fSThierry Reding if (!ivc->peer) 82*ca791d7fSThierry Reding return; 83*ca791d7fSThierry Reding 84*ca791d7fSThierry Reding dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN, 85*ca791d7fSThierry Reding DMA_FROM_DEVICE); 86*ca791d7fSThierry Reding } 87*ca791d7fSThierry Reding 88*ca791d7fSThierry Reding static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys) 89*ca791d7fSThierry Reding { 90*ca791d7fSThierry Reding if (!ivc->peer) 91*ca791d7fSThierry Reding return; 92*ca791d7fSThierry Reding 93*ca791d7fSThierry Reding dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN, 94*ca791d7fSThierry Reding DMA_TO_DEVICE); 95*ca791d7fSThierry Reding } 96*ca791d7fSThierry Reding 97*ca791d7fSThierry Reding static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, 98*ca791d7fSThierry Reding struct tegra_ivc_header *header) 99*ca791d7fSThierry Reding { 100*ca791d7fSThierry Reding /* 101*ca791d7fSThierry Reding * This function performs multiple checks on the same values with 102*ca791d7fSThierry Reding * security implications, so create snapshots with ACCESS_ONCE() to 103*ca791d7fSThierry Reding * ensure that these checks use the same values. 104*ca791d7fSThierry Reding */ 105*ca791d7fSThierry Reding u32 tx = ACCESS_ONCE(header->tx.count); 106*ca791d7fSThierry Reding u32 rx = ACCESS_ONCE(header->rx.count); 107*ca791d7fSThierry Reding 108*ca791d7fSThierry Reding /* 109*ca791d7fSThierry Reding * Perform an over-full check to prevent denial of service attacks 110*ca791d7fSThierry Reding * where a server could be easily fooled into believing that there's 111*ca791d7fSThierry Reding * an extremely large number of frames ready, since receivers are not 112*ca791d7fSThierry Reding * expected to check for full or over-full conditions. 113*ca791d7fSThierry Reding * 114*ca791d7fSThierry Reding * Although the channel isn't empty, this is an invalid case caused by 115*ca791d7fSThierry Reding * a potentially malicious peer, so returning empty is safer, because 116*ca791d7fSThierry Reding * it gives the impression that the channel has gone silent. 117*ca791d7fSThierry Reding */ 118*ca791d7fSThierry Reding if (tx - rx > ivc->num_frames) 119*ca791d7fSThierry Reding return true; 120*ca791d7fSThierry Reding 121*ca791d7fSThierry Reding return tx == rx; 122*ca791d7fSThierry Reding } 123*ca791d7fSThierry Reding 124*ca791d7fSThierry Reding static inline bool tegra_ivc_full(struct tegra_ivc *ivc, 125*ca791d7fSThierry Reding struct tegra_ivc_header *header) 126*ca791d7fSThierry Reding { 127*ca791d7fSThierry Reding u32 tx = ACCESS_ONCE(header->tx.count); 128*ca791d7fSThierry Reding u32 rx = ACCESS_ONCE(header->rx.count); 129*ca791d7fSThierry Reding 130*ca791d7fSThierry Reding /* 131*ca791d7fSThierry Reding * Invalid cases where the counters indicate that the queue is over 132*ca791d7fSThierry Reding * capacity also appear full. 133*ca791d7fSThierry Reding */ 134*ca791d7fSThierry Reding return tx - rx >= ivc->num_frames; 135*ca791d7fSThierry Reding } 136*ca791d7fSThierry Reding 137*ca791d7fSThierry Reding static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, 138*ca791d7fSThierry Reding struct tegra_ivc_header *header) 139*ca791d7fSThierry Reding { 140*ca791d7fSThierry Reding u32 tx = ACCESS_ONCE(header->tx.count); 141*ca791d7fSThierry Reding u32 rx = ACCESS_ONCE(header->rx.count); 142*ca791d7fSThierry Reding 143*ca791d7fSThierry Reding /* 144*ca791d7fSThierry Reding * This function isn't expected to be used in scenarios where an 145*ca791d7fSThierry Reding * over-full situation can lead to denial of service attacks. See the 146*ca791d7fSThierry Reding * comment in tegra_ivc_empty() for an explanation about special 147*ca791d7fSThierry Reding * over-full considerations. 148*ca791d7fSThierry Reding */ 149*ca791d7fSThierry Reding return tx - rx; 150*ca791d7fSThierry Reding } 151*ca791d7fSThierry Reding 152*ca791d7fSThierry Reding static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) 153*ca791d7fSThierry Reding { 154*ca791d7fSThierry Reding ACCESS_ONCE(ivc->tx.channel->tx.count) = 155*ca791d7fSThierry Reding ACCESS_ONCE(ivc->tx.channel->tx.count) + 1; 156*ca791d7fSThierry Reding 157*ca791d7fSThierry Reding if (ivc->tx.position == ivc->num_frames - 1) 158*ca791d7fSThierry Reding ivc->tx.position = 0; 159*ca791d7fSThierry Reding else 160*ca791d7fSThierry Reding ivc->tx.position++; 161*ca791d7fSThierry Reding } 162*ca791d7fSThierry Reding 163*ca791d7fSThierry Reding static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) 164*ca791d7fSThierry Reding { 165*ca791d7fSThierry Reding ACCESS_ONCE(ivc->rx.channel->rx.count) = 166*ca791d7fSThierry Reding ACCESS_ONCE(ivc->rx.channel->rx.count) + 1; 167*ca791d7fSThierry Reding 168*ca791d7fSThierry Reding if (ivc->rx.position == ivc->num_frames - 1) 169*ca791d7fSThierry Reding ivc->rx.position = 0; 170*ca791d7fSThierry Reding else 171*ca791d7fSThierry Reding ivc->rx.position++; 172*ca791d7fSThierry Reding } 173*ca791d7fSThierry Reding 174*ca791d7fSThierry Reding static inline int tegra_ivc_check_read(struct tegra_ivc *ivc) 175*ca791d7fSThierry Reding { 176*ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 177*ca791d7fSThierry Reding 178*ca791d7fSThierry Reding /* 179*ca791d7fSThierry Reding * tx.channel->state is set locally, so it is not synchronized with 180*ca791d7fSThierry Reding * state from the remote peer. The remote peer cannot reset its 181*ca791d7fSThierry Reding * transmit counters until we've acknowledged its synchronization 182*ca791d7fSThierry Reding * request, so no additional synchronization is required because an 183*ca791d7fSThierry Reding * asynchronous transition of rx.channel->state to 184*ca791d7fSThierry Reding * TEGRA_IVC_STATE_ACK is not allowed. 185*ca791d7fSThierry Reding */ 186*ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 187*ca791d7fSThierry Reding return -ECONNRESET; 188*ca791d7fSThierry Reding 189*ca791d7fSThierry Reding /* 190*ca791d7fSThierry Reding * Avoid unnecessary invalidations when performing repeated accesses 191*ca791d7fSThierry Reding * to an IVC channel by checking the old queue pointers first. 192*ca791d7fSThierry Reding * 193*ca791d7fSThierry Reding * Synchronization is only necessary when these pointers indicate 194*ca791d7fSThierry Reding * empty or full. 195*ca791d7fSThierry Reding */ 196*ca791d7fSThierry Reding if (!tegra_ivc_empty(ivc, ivc->rx.channel)) 197*ca791d7fSThierry Reding return 0; 198*ca791d7fSThierry Reding 199*ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); 200*ca791d7fSThierry Reding 201*ca791d7fSThierry Reding if (tegra_ivc_empty(ivc, ivc->rx.channel)) 202*ca791d7fSThierry Reding return -ENOSPC; 203*ca791d7fSThierry Reding 204*ca791d7fSThierry Reding return 0; 205*ca791d7fSThierry Reding } 206*ca791d7fSThierry Reding 207*ca791d7fSThierry Reding static inline int tegra_ivc_check_write(struct tegra_ivc *ivc) 208*ca791d7fSThierry Reding { 209*ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, rx.count); 210*ca791d7fSThierry Reding 211*ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 212*ca791d7fSThierry Reding return -ECONNRESET; 213*ca791d7fSThierry Reding 214*ca791d7fSThierry Reding if (!tegra_ivc_full(ivc, ivc->tx.channel)) 215*ca791d7fSThierry Reding return 0; 216*ca791d7fSThierry Reding 217*ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->tx.phys + offset); 218*ca791d7fSThierry Reding 219*ca791d7fSThierry Reding if (tegra_ivc_full(ivc, ivc->tx.channel)) 220*ca791d7fSThierry Reding return -ENOSPC; 221*ca791d7fSThierry Reding 222*ca791d7fSThierry Reding return 0; 223*ca791d7fSThierry Reding } 224*ca791d7fSThierry Reding 225*ca791d7fSThierry Reding static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc, 226*ca791d7fSThierry Reding struct tegra_ivc_header *header, 227*ca791d7fSThierry Reding unsigned int frame) 228*ca791d7fSThierry Reding { 229*ca791d7fSThierry Reding if (WARN_ON(frame >= ivc->num_frames)) 230*ca791d7fSThierry Reding return ERR_PTR(-EINVAL); 231*ca791d7fSThierry Reding 232*ca791d7fSThierry Reding return (void *)(header + 1) + ivc->frame_size * frame; 233*ca791d7fSThierry Reding } 234*ca791d7fSThierry Reding 235*ca791d7fSThierry Reding static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc, 236*ca791d7fSThierry Reding dma_addr_t phys, 237*ca791d7fSThierry Reding unsigned int frame) 238*ca791d7fSThierry Reding { 239*ca791d7fSThierry Reding unsigned long offset; 240*ca791d7fSThierry Reding 241*ca791d7fSThierry Reding offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame; 242*ca791d7fSThierry Reding 243*ca791d7fSThierry Reding return phys + offset; 244*ca791d7fSThierry Reding } 245*ca791d7fSThierry Reding 246*ca791d7fSThierry Reding static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc, 247*ca791d7fSThierry Reding dma_addr_t phys, 248*ca791d7fSThierry Reding unsigned int frame, 249*ca791d7fSThierry Reding unsigned int offset, 250*ca791d7fSThierry Reding size_t size) 251*ca791d7fSThierry Reding { 252*ca791d7fSThierry Reding if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) 253*ca791d7fSThierry Reding return; 254*ca791d7fSThierry Reding 255*ca791d7fSThierry Reding phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; 256*ca791d7fSThierry Reding 257*ca791d7fSThierry Reding dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE); 258*ca791d7fSThierry Reding } 259*ca791d7fSThierry Reding 260*ca791d7fSThierry Reding static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc, 261*ca791d7fSThierry Reding dma_addr_t phys, 262*ca791d7fSThierry Reding unsigned int frame, 263*ca791d7fSThierry Reding unsigned int offset, 264*ca791d7fSThierry Reding size_t size) 265*ca791d7fSThierry Reding { 266*ca791d7fSThierry Reding if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) 267*ca791d7fSThierry Reding return; 268*ca791d7fSThierry Reding 269*ca791d7fSThierry Reding phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; 270*ca791d7fSThierry Reding 271*ca791d7fSThierry Reding dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE); 272*ca791d7fSThierry Reding } 273*ca791d7fSThierry Reding 274*ca791d7fSThierry Reding /* directly peek at the next frame rx'ed */ 275*ca791d7fSThierry Reding void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc) 276*ca791d7fSThierry Reding { 277*ca791d7fSThierry Reding int err; 278*ca791d7fSThierry Reding 279*ca791d7fSThierry Reding if (WARN_ON(ivc == NULL)) 280*ca791d7fSThierry Reding return ERR_PTR(-EINVAL); 281*ca791d7fSThierry Reding 282*ca791d7fSThierry Reding err = tegra_ivc_check_read(ivc); 283*ca791d7fSThierry Reding if (err < 0) 284*ca791d7fSThierry Reding return ERR_PTR(err); 285*ca791d7fSThierry Reding 286*ca791d7fSThierry Reding /* 287*ca791d7fSThierry Reding * Order observation of ivc->rx.position potentially indicating new 288*ca791d7fSThierry Reding * data before data read. 289*ca791d7fSThierry Reding */ 290*ca791d7fSThierry Reding smp_rmb(); 291*ca791d7fSThierry Reding 292*ca791d7fSThierry Reding tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0, 293*ca791d7fSThierry Reding ivc->frame_size); 294*ca791d7fSThierry Reding 295*ca791d7fSThierry Reding return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position); 296*ca791d7fSThierry Reding } 297*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_read_get_next_frame); 298*ca791d7fSThierry Reding 299*ca791d7fSThierry Reding int tegra_ivc_read_advance(struct tegra_ivc *ivc) 300*ca791d7fSThierry Reding { 301*ca791d7fSThierry Reding unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); 302*ca791d7fSThierry Reding unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); 303*ca791d7fSThierry Reding int err; 304*ca791d7fSThierry Reding 305*ca791d7fSThierry Reding /* 306*ca791d7fSThierry Reding * No read barriers or synchronization here: the caller is expected to 307*ca791d7fSThierry Reding * have already observed the channel non-empty. This check is just to 308*ca791d7fSThierry Reding * catch programming errors. 309*ca791d7fSThierry Reding */ 310*ca791d7fSThierry Reding err = tegra_ivc_check_read(ivc); 311*ca791d7fSThierry Reding if (err < 0) 312*ca791d7fSThierry Reding return err; 313*ca791d7fSThierry Reding 314*ca791d7fSThierry Reding tegra_ivc_advance_rx(ivc); 315*ca791d7fSThierry Reding 316*ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->rx.phys + rx); 317*ca791d7fSThierry Reding 318*ca791d7fSThierry Reding /* 319*ca791d7fSThierry Reding * Ensure our write to ivc->rx.position occurs before our read from 320*ca791d7fSThierry Reding * ivc->tx.position. 321*ca791d7fSThierry Reding */ 322*ca791d7fSThierry Reding smp_mb(); 323*ca791d7fSThierry Reding 324*ca791d7fSThierry Reding /* 325*ca791d7fSThierry Reding * Notify only upon transition from full to non-full. The available 326*ca791d7fSThierry Reding * count can only asynchronously increase, so the worst possible 327*ca791d7fSThierry Reding * side-effect will be a spurious notification. 328*ca791d7fSThierry Reding */ 329*ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + tx); 330*ca791d7fSThierry Reding 331*ca791d7fSThierry Reding if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1) 332*ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 333*ca791d7fSThierry Reding 334*ca791d7fSThierry Reding return 0; 335*ca791d7fSThierry Reding } 336*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_read_advance); 337*ca791d7fSThierry Reding 338*ca791d7fSThierry Reding /* directly poke at the next frame to be tx'ed */ 339*ca791d7fSThierry Reding void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc) 340*ca791d7fSThierry Reding { 341*ca791d7fSThierry Reding int err; 342*ca791d7fSThierry Reding 343*ca791d7fSThierry Reding err = tegra_ivc_check_write(ivc); 344*ca791d7fSThierry Reding if (err < 0) 345*ca791d7fSThierry Reding return ERR_PTR(err); 346*ca791d7fSThierry Reding 347*ca791d7fSThierry Reding return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position); 348*ca791d7fSThierry Reding } 349*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_write_get_next_frame); 350*ca791d7fSThierry Reding 351*ca791d7fSThierry Reding /* advance the tx buffer */ 352*ca791d7fSThierry Reding int tegra_ivc_write_advance(struct tegra_ivc *ivc) 353*ca791d7fSThierry Reding { 354*ca791d7fSThierry Reding unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); 355*ca791d7fSThierry Reding unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); 356*ca791d7fSThierry Reding int err; 357*ca791d7fSThierry Reding 358*ca791d7fSThierry Reding err = tegra_ivc_check_write(ivc); 359*ca791d7fSThierry Reding if (err < 0) 360*ca791d7fSThierry Reding return err; 361*ca791d7fSThierry Reding 362*ca791d7fSThierry Reding tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0, 363*ca791d7fSThierry Reding ivc->frame_size); 364*ca791d7fSThierry Reding 365*ca791d7fSThierry Reding /* 366*ca791d7fSThierry Reding * Order any possible stores to the frame before update of 367*ca791d7fSThierry Reding * ivc->tx.position. 368*ca791d7fSThierry Reding */ 369*ca791d7fSThierry Reding smp_wmb(); 370*ca791d7fSThierry Reding 371*ca791d7fSThierry Reding tegra_ivc_advance_tx(ivc); 372*ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + tx); 373*ca791d7fSThierry Reding 374*ca791d7fSThierry Reding /* 375*ca791d7fSThierry Reding * Ensure our write to ivc->tx.position occurs before our read from 376*ca791d7fSThierry Reding * ivc->rx.position. 377*ca791d7fSThierry Reding */ 378*ca791d7fSThierry Reding smp_mb(); 379*ca791d7fSThierry Reding 380*ca791d7fSThierry Reding /* 381*ca791d7fSThierry Reding * Notify only upon transition from empty to non-empty. The available 382*ca791d7fSThierry Reding * count can only asynchronously decrease, so the worst possible 383*ca791d7fSThierry Reding * side-effect will be a spurious notification. 384*ca791d7fSThierry Reding */ 385*ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->tx.phys + rx); 386*ca791d7fSThierry Reding 387*ca791d7fSThierry Reding if (tegra_ivc_available(ivc, ivc->tx.channel) == 1) 388*ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 389*ca791d7fSThierry Reding 390*ca791d7fSThierry Reding return 0; 391*ca791d7fSThierry Reding } 392*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_write_advance); 393*ca791d7fSThierry Reding 394*ca791d7fSThierry Reding void tegra_ivc_reset(struct tegra_ivc *ivc) 395*ca791d7fSThierry Reding { 396*ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 397*ca791d7fSThierry Reding 398*ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC; 399*ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 400*ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 401*ca791d7fSThierry Reding } 402*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_reset); 403*ca791d7fSThierry Reding 404*ca791d7fSThierry Reding /* 405*ca791d7fSThierry Reding * ======================================================= 406*ca791d7fSThierry Reding * IVC State Transition Table - see tegra_ivc_notified() 407*ca791d7fSThierry Reding * ======================================================= 408*ca791d7fSThierry Reding * 409*ca791d7fSThierry Reding * local remote action 410*ca791d7fSThierry Reding * ----- ------ ----------------------------------- 411*ca791d7fSThierry Reding * SYNC EST <none> 412*ca791d7fSThierry Reding * SYNC ACK reset counters; move to EST; notify 413*ca791d7fSThierry Reding * SYNC SYNC reset counters; move to ACK; notify 414*ca791d7fSThierry Reding * ACK EST move to EST; notify 415*ca791d7fSThierry Reding * ACK ACK move to EST; notify 416*ca791d7fSThierry Reding * ACK SYNC reset counters; move to ACK; notify 417*ca791d7fSThierry Reding * EST EST <none> 418*ca791d7fSThierry Reding * EST ACK <none> 419*ca791d7fSThierry Reding * EST SYNC reset counters; move to ACK; notify 420*ca791d7fSThierry Reding * 421*ca791d7fSThierry Reding * =============================================================== 422*ca791d7fSThierry Reding */ 423*ca791d7fSThierry Reding 424*ca791d7fSThierry Reding int tegra_ivc_notified(struct tegra_ivc *ivc) 425*ca791d7fSThierry Reding { 426*ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 427*ca791d7fSThierry Reding enum tegra_ivc_state state; 428*ca791d7fSThierry Reding 429*ca791d7fSThierry Reding /* Copy the receiver's state out of shared memory. */ 430*ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); 431*ca791d7fSThierry Reding state = ACCESS_ONCE(ivc->rx.channel->tx.state); 432*ca791d7fSThierry Reding 433*ca791d7fSThierry Reding if (state == TEGRA_IVC_STATE_SYNC) { 434*ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 435*ca791d7fSThierry Reding 436*ca791d7fSThierry Reding /* 437*ca791d7fSThierry Reding * Order observation of TEGRA_IVC_STATE_SYNC before stores 438*ca791d7fSThierry Reding * clearing tx.channel. 439*ca791d7fSThierry Reding */ 440*ca791d7fSThierry Reding smp_rmb(); 441*ca791d7fSThierry Reding 442*ca791d7fSThierry Reding /* 443*ca791d7fSThierry Reding * Reset tx.channel counters. The remote end is in the SYNC 444*ca791d7fSThierry Reding * state and won't make progress until we change our state, 445*ca791d7fSThierry Reding * so the counters are not in use at this time. 446*ca791d7fSThierry Reding */ 447*ca791d7fSThierry Reding ivc->tx.channel->tx.count = 0; 448*ca791d7fSThierry Reding ivc->rx.channel->rx.count = 0; 449*ca791d7fSThierry Reding 450*ca791d7fSThierry Reding ivc->tx.position = 0; 451*ca791d7fSThierry Reding ivc->rx.position = 0; 452*ca791d7fSThierry Reding 453*ca791d7fSThierry Reding /* 454*ca791d7fSThierry Reding * Ensure that counters appear cleared before new state can be 455*ca791d7fSThierry Reding * observed. 456*ca791d7fSThierry Reding */ 457*ca791d7fSThierry Reding smp_wmb(); 458*ca791d7fSThierry Reding 459*ca791d7fSThierry Reding /* 460*ca791d7fSThierry Reding * Move to ACK state. We have just cleared our counters, so it 461*ca791d7fSThierry Reding * is now safe for the remote end to start using these values. 462*ca791d7fSThierry Reding */ 463*ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK; 464*ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 465*ca791d7fSThierry Reding 466*ca791d7fSThierry Reding /* 467*ca791d7fSThierry Reding * Notify remote end to observe state transition. 468*ca791d7fSThierry Reding */ 469*ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 470*ca791d7fSThierry Reding 471*ca791d7fSThierry Reding } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC && 472*ca791d7fSThierry Reding state == TEGRA_IVC_STATE_ACK) { 473*ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 474*ca791d7fSThierry Reding 475*ca791d7fSThierry Reding /* 476*ca791d7fSThierry Reding * Order observation of ivc_state_sync before stores clearing 477*ca791d7fSThierry Reding * tx_channel. 478*ca791d7fSThierry Reding */ 479*ca791d7fSThierry Reding smp_rmb(); 480*ca791d7fSThierry Reding 481*ca791d7fSThierry Reding /* 482*ca791d7fSThierry Reding * Reset tx.channel counters. The remote end is in the ACK 483*ca791d7fSThierry Reding * state and won't make progress until we change our state, 484*ca791d7fSThierry Reding * so the counters are not in use at this time. 485*ca791d7fSThierry Reding */ 486*ca791d7fSThierry Reding ivc->tx.channel->tx.count = 0; 487*ca791d7fSThierry Reding ivc->rx.channel->rx.count = 0; 488*ca791d7fSThierry Reding 489*ca791d7fSThierry Reding ivc->tx.position = 0; 490*ca791d7fSThierry Reding ivc->rx.position = 0; 491*ca791d7fSThierry Reding 492*ca791d7fSThierry Reding /* 493*ca791d7fSThierry Reding * Ensure that counters appear cleared before new state can be 494*ca791d7fSThierry Reding * observed. 495*ca791d7fSThierry Reding */ 496*ca791d7fSThierry Reding smp_wmb(); 497*ca791d7fSThierry Reding 498*ca791d7fSThierry Reding /* 499*ca791d7fSThierry Reding * Move to ESTABLISHED state. We know that the remote end has 500*ca791d7fSThierry Reding * already cleared its counters, so it is safe to start 501*ca791d7fSThierry Reding * writing/reading on this channel. 502*ca791d7fSThierry Reding */ 503*ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; 504*ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 505*ca791d7fSThierry Reding 506*ca791d7fSThierry Reding /* 507*ca791d7fSThierry Reding * Notify remote end to observe state transition. 508*ca791d7fSThierry Reding */ 509*ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 510*ca791d7fSThierry Reding 511*ca791d7fSThierry Reding } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) { 512*ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 513*ca791d7fSThierry Reding 514*ca791d7fSThierry Reding /* 515*ca791d7fSThierry Reding * At this point, we have observed the peer to be in either 516*ca791d7fSThierry Reding * the ACK or ESTABLISHED state. Next, order observation of 517*ca791d7fSThierry Reding * peer state before storing to tx.channel. 518*ca791d7fSThierry Reding */ 519*ca791d7fSThierry Reding smp_rmb(); 520*ca791d7fSThierry Reding 521*ca791d7fSThierry Reding /* 522*ca791d7fSThierry Reding * Move to ESTABLISHED state. We know that we have previously 523*ca791d7fSThierry Reding * cleared our counters, and we know that the remote end has 524*ca791d7fSThierry Reding * cleared its counters, so it is safe to start writing/reading 525*ca791d7fSThierry Reding * on this channel. 526*ca791d7fSThierry Reding */ 527*ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; 528*ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 529*ca791d7fSThierry Reding 530*ca791d7fSThierry Reding /* 531*ca791d7fSThierry Reding * Notify remote end to observe state transition. 532*ca791d7fSThierry Reding */ 533*ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 534*ca791d7fSThierry Reding 535*ca791d7fSThierry Reding } else { 536*ca791d7fSThierry Reding /* 537*ca791d7fSThierry Reding * There is no need to handle any further action. Either the 538*ca791d7fSThierry Reding * channel is already fully established, or we are waiting for 539*ca791d7fSThierry Reding * the remote end to catch up with our current state. Refer 540*ca791d7fSThierry Reding * to the diagram in "IVC State Transition Table" above. 541*ca791d7fSThierry Reding */ 542*ca791d7fSThierry Reding } 543*ca791d7fSThierry Reding 544*ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 545*ca791d7fSThierry Reding return -EAGAIN; 546*ca791d7fSThierry Reding 547*ca791d7fSThierry Reding return 0; 548*ca791d7fSThierry Reding } 549*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_notified); 550*ca791d7fSThierry Reding 551*ca791d7fSThierry Reding size_t tegra_ivc_align(size_t size) 552*ca791d7fSThierry Reding { 553*ca791d7fSThierry Reding return ALIGN(size, TEGRA_IVC_ALIGN); 554*ca791d7fSThierry Reding } 555*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_align); 556*ca791d7fSThierry Reding 557*ca791d7fSThierry Reding unsigned tegra_ivc_total_queue_size(unsigned queue_size) 558*ca791d7fSThierry Reding { 559*ca791d7fSThierry Reding if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) { 560*ca791d7fSThierry Reding pr_err("%s: queue_size (%u) must be %u-byte aligned\n", 561*ca791d7fSThierry Reding __func__, queue_size, TEGRA_IVC_ALIGN); 562*ca791d7fSThierry Reding return 0; 563*ca791d7fSThierry Reding } 564*ca791d7fSThierry Reding 565*ca791d7fSThierry Reding return queue_size + sizeof(struct tegra_ivc_header); 566*ca791d7fSThierry Reding } 567*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_total_queue_size); 568*ca791d7fSThierry Reding 569*ca791d7fSThierry Reding static int tegra_ivc_check_params(unsigned long rx, unsigned long tx, 570*ca791d7fSThierry Reding unsigned int num_frames, size_t frame_size) 571*ca791d7fSThierry Reding { 572*ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count), 573*ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 574*ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count), 575*ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 576*ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header), 577*ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 578*ca791d7fSThierry Reding 579*ca791d7fSThierry Reding if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) { 580*ca791d7fSThierry Reding pr_err("num_frames * frame_size overflows\n"); 581*ca791d7fSThierry Reding return -EINVAL; 582*ca791d7fSThierry Reding } 583*ca791d7fSThierry Reding 584*ca791d7fSThierry Reding if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) { 585*ca791d7fSThierry Reding pr_err("frame size not adequately aligned: %zu\n", frame_size); 586*ca791d7fSThierry Reding return -EINVAL; 587*ca791d7fSThierry Reding } 588*ca791d7fSThierry Reding 589*ca791d7fSThierry Reding /* 590*ca791d7fSThierry Reding * The headers must at least be aligned enough for counters 591*ca791d7fSThierry Reding * to be accessed atomically. 592*ca791d7fSThierry Reding */ 593*ca791d7fSThierry Reding if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) { 594*ca791d7fSThierry Reding pr_err("IVC channel start not aligned: %#lx\n", rx); 595*ca791d7fSThierry Reding return -EINVAL; 596*ca791d7fSThierry Reding } 597*ca791d7fSThierry Reding 598*ca791d7fSThierry Reding if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) { 599*ca791d7fSThierry Reding pr_err("IVC channel start not aligned: %#lx\n", tx); 600*ca791d7fSThierry Reding return -EINVAL; 601*ca791d7fSThierry Reding } 602*ca791d7fSThierry Reding 603*ca791d7fSThierry Reding if (rx < tx) { 604*ca791d7fSThierry Reding if (rx + frame_size * num_frames > tx) { 605*ca791d7fSThierry Reding pr_err("queue regions overlap: %#lx + %zx > %#lx\n", 606*ca791d7fSThierry Reding rx, frame_size * num_frames, tx); 607*ca791d7fSThierry Reding return -EINVAL; 608*ca791d7fSThierry Reding } 609*ca791d7fSThierry Reding } else { 610*ca791d7fSThierry Reding if (tx + frame_size * num_frames > rx) { 611*ca791d7fSThierry Reding pr_err("queue regions overlap: %#lx + %zx > %#lx\n", 612*ca791d7fSThierry Reding tx, frame_size * num_frames, rx); 613*ca791d7fSThierry Reding return -EINVAL; 614*ca791d7fSThierry Reding } 615*ca791d7fSThierry Reding } 616*ca791d7fSThierry Reding 617*ca791d7fSThierry Reding return 0; 618*ca791d7fSThierry Reding } 619*ca791d7fSThierry Reding 620*ca791d7fSThierry Reding int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx, 621*ca791d7fSThierry Reding dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys, 622*ca791d7fSThierry Reding unsigned int num_frames, size_t frame_size, 623*ca791d7fSThierry Reding void (*notify)(struct tegra_ivc *ivc, void *data), 624*ca791d7fSThierry Reding void *data) 625*ca791d7fSThierry Reding { 626*ca791d7fSThierry Reding size_t queue_size; 627*ca791d7fSThierry Reding int err; 628*ca791d7fSThierry Reding 629*ca791d7fSThierry Reding if (WARN_ON(!ivc || !notify)) 630*ca791d7fSThierry Reding return -EINVAL; 631*ca791d7fSThierry Reding 632*ca791d7fSThierry Reding /* 633*ca791d7fSThierry Reding * All sizes that can be returned by communication functions should 634*ca791d7fSThierry Reding * fit in an int. 635*ca791d7fSThierry Reding */ 636*ca791d7fSThierry Reding if (frame_size > INT_MAX) 637*ca791d7fSThierry Reding return -E2BIG; 638*ca791d7fSThierry Reding 639*ca791d7fSThierry Reding err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx, 640*ca791d7fSThierry Reding num_frames, frame_size); 641*ca791d7fSThierry Reding if (err < 0) 642*ca791d7fSThierry Reding return err; 643*ca791d7fSThierry Reding 644*ca791d7fSThierry Reding queue_size = tegra_ivc_total_queue_size(num_frames * frame_size); 645*ca791d7fSThierry Reding 646*ca791d7fSThierry Reding if (peer) { 647*ca791d7fSThierry Reding ivc->rx.phys = dma_map_single(peer, rx, queue_size, 648*ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 649*ca791d7fSThierry Reding if (ivc->rx.phys == DMA_ERROR_CODE) 650*ca791d7fSThierry Reding return -ENOMEM; 651*ca791d7fSThierry Reding 652*ca791d7fSThierry Reding ivc->tx.phys = dma_map_single(peer, tx, queue_size, 653*ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 654*ca791d7fSThierry Reding if (ivc->tx.phys == DMA_ERROR_CODE) { 655*ca791d7fSThierry Reding dma_unmap_single(peer, ivc->rx.phys, queue_size, 656*ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 657*ca791d7fSThierry Reding return -ENOMEM; 658*ca791d7fSThierry Reding } 659*ca791d7fSThierry Reding } else { 660*ca791d7fSThierry Reding ivc->rx.phys = rx_phys; 661*ca791d7fSThierry Reding ivc->tx.phys = tx_phys; 662*ca791d7fSThierry Reding } 663*ca791d7fSThierry Reding 664*ca791d7fSThierry Reding ivc->rx.channel = rx; 665*ca791d7fSThierry Reding ivc->tx.channel = tx; 666*ca791d7fSThierry Reding ivc->peer = peer; 667*ca791d7fSThierry Reding ivc->notify = notify; 668*ca791d7fSThierry Reding ivc->notify_data = data; 669*ca791d7fSThierry Reding ivc->frame_size = frame_size; 670*ca791d7fSThierry Reding ivc->num_frames = num_frames; 671*ca791d7fSThierry Reding 672*ca791d7fSThierry Reding /* 673*ca791d7fSThierry Reding * These values aren't necessarily correct until the channel has been 674*ca791d7fSThierry Reding * reset. 675*ca791d7fSThierry Reding */ 676*ca791d7fSThierry Reding ivc->tx.position = 0; 677*ca791d7fSThierry Reding ivc->rx.position = 0; 678*ca791d7fSThierry Reding 679*ca791d7fSThierry Reding return 0; 680*ca791d7fSThierry Reding } 681*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_init); 682*ca791d7fSThierry Reding 683*ca791d7fSThierry Reding void tegra_ivc_cleanup(struct tegra_ivc *ivc) 684*ca791d7fSThierry Reding { 685*ca791d7fSThierry Reding if (ivc->peer) { 686*ca791d7fSThierry Reding size_t size = tegra_ivc_total_queue_size(ivc->num_frames * 687*ca791d7fSThierry Reding ivc->frame_size); 688*ca791d7fSThierry Reding 689*ca791d7fSThierry Reding dma_unmap_single(ivc->peer, ivc->rx.phys, size, 690*ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 691*ca791d7fSThierry Reding dma_unmap_single(ivc->peer, ivc->tx.phys, size, 692*ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 693*ca791d7fSThierry Reding } 694*ca791d7fSThierry Reding } 695*ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_cleanup); 696